# =============================================================================
# R SCRIPT: Frequentist analysis for paper detailed below ---------------------
# TITLE: Changing-state irrelevant speech disrupts visual-verbal but not ------
# visual-spatial serial recall ------------------------------------------------
# AUTHOR(S): Marsh et al. -----------------------------------------------------
# YEAR: 2023 ------------------------------------------------------------------
# =============================================================================

# =============================================================================
# load packages ---------------------------------------------------------------
library(tidyverse)
library(afex)

# =============================================================================
# EXPT. 1: PARTIAL REPLICATION ================================================
# =============================================================================

# =============================================================================
# read in and sort data -------------------------------------------------------

# --- read data
part_rep_data <- read.csv("data/within_means.csv")

# --- group by id
part_rep_data <- part_rep_data %>%
  group_by(id)

# pivot data from wide to long format
part_rep_data <- part_rep_data %>%
  pivot_longer(cols = starts_with("sp"),
               names_to = "position",
               values_to = "pos_total") %>%
  mutate(id = as.factor(id),
         order = as.factor(order),
         task = as.factor(task),
         sound = as.factor(sound),
         position = as.factor(position))

# =============================================================================
# ANOVA: 3 (sound condition) x 2 (task modality) x 7 (serial position) x ------
# 2 (task order) --------------------------------------------------------------

# --- get data for ANOVA
first_part_data <- part_rep_data %>%
  group_by(id, task, sound, order, position)

# --- ANOVA
first_part_aov <- aov_ez(id = "id", dv = "pos_total", data = first_part_data,
                         within = c("task", "sound", "position"),
                         between = "order",
                         anova_table = list(correction = "none", es = "pes"))

# =============================================================================
# ANOVA: 3 (sound condition) x 2 (task modality) x 7 (serial position) --------

# --- get data for ANOVA
second_part_data <- part_rep_data %>%
  select(-order) %>%
  group_by(id, sound, task, position)

# --- ANOVA
second_part_aov <- aov_ez(id = "id", dv = "pos_total", data = second_part_data,
                          within = c("sound", "task", "position"),
                          anova_table = list(correction = "none", es = "pes"))

# =============================================================================
# ANOVA: 2 (sound condition: steady, changing) x 2 (task modality) ------------

# --- get data for ANOVA
third_part_data <- part_rep_data %>%
  select(-order) %>%
  filter(!sound == "quiet") %>%
  group_by(id, sound, task)

# --- ANOVA
third_part_aov <- aov_ez(id = "id", dv = "pos_total", data = third_part_data,
                         within = c("sound", "task"),
                         anova_table = list(correction = "none", es = "pes"))

# =============================================================================
# ANOVA: breakdown of changing-state effect by task modality ------------------

# spatial task ----------------------------------------------------------------

# --- get data for spatial task
cse_spatial_data <- third_part_data %>%
  filter(task == "spatial") %>%
  group_by(id, sound)

# --- ANOVA
cse_spatial_aov <- aov_ez(id = "id", dv = "pos_total", data = cse_spatial_data,
                          within = "sound",
                          anova_table = list(correction = "none", es = "pes"))

# verbal task -----------------------------------------------------------------

# --- get data for verbal task
cse_verbal_data <- third_part_data %>%
  filter(task == "verbal") %>%
  group_by(id, sound)

# --- ANOVA
cse_verbal_aov <- aov_ez(id = "id", dv = "pos_total", data = cse_verbal_data,
                         within = "sound",
                         anova_table = list(correction = "none", es = "pes"))

# =============================================================================
# EXPT. 2: NEAR IDENTICAL REPLICATION =========================================
# =============================================================================

# =============================================================================
# read in and sort data -------------------------------------------------------

# --- read data
near_iden_data <- read.csv("data/between_means.csv")

# --- generate a new ID for the ANOVA
for(i in 1:nrow(near_iden_data)){
  near_iden_data$aov_id[i] <- paste(near_iden_data$id[i],
                                    "_",
                                    near_iden_data$task[i],
                                    sep = "")
}

# --- group by id and pivot from wide to long format
near_iden_data <- near_iden_data %>%
  group_by(aov_id) %>%
  pivot_longer(cols = starts_with("sp"),
               names_to = "position",
               values_to = "pos_total") %>%
  mutate(aov_id = as.factor(aov_id),
         task = as.factor(task),
         sound = as.factor(sound),
         position = as.factor(position))

# =============================================================================
# ANOVA: 3 (sound condition) x 2 (task modality) x 7 (serial position) --------

# --- get data for ANOVA
first_near_data <- near_iden_data %>%
  group_by(sound, task, position)

# --- ANOVA
first_near_aov <- aov_ez(id = "aov_id", dv = "pos_total",
                         data = first_near_data,
                         within = c("sound", "position"),
                         between = "task",
                         anova_table = list(correction = "none", es = "pes"))

# =============================================================================
# ANOVA: breakdown of changing-state effect by task modality ------------------

# spatial task ----------------------------------------------------------------

# --- get data for spatial task
near_spatial_cse_data <- first_near_data %>%
  filter(task == "spatial") %>%
  filter(!sound == "quiet") %>%
  group_by(sound)

# --- ANOVA
near_spatial_cse_aov <- aov_ez(id = "aov_id", dv = "pos_total",
                               data = near_spatial_cse_data,
                               within = "sound",
                               anova_table = list(correction = "none",
                                                  es = "pes"))

# verbal task -----------------------------------------------------------------

# --- get data for verbal task
near_verbal_cse_data <- first_near_data %>%
  filter(task == "verbal") %>%
  filter(!sound == "quiet") %>%
  group_by(sound)

# --- ANOVA
near_verbal_cse_aov <- aov_ez(id = "aov_id", dv = "pos_total",
                              data = near_verbal_cse_data,
                              within = "sound",
                              anova_table = list(correction = "none",
                                                 es = "pes"))

# =============================================================================
# =============================================================================